# This is a BitKeeper generated patch for the following project: # Project Name: Linux kernel tree # This patch format is intended for GNU patch command version 2.5 or higher. # This patch includes the following deltas: # ChangeSet 1.884.35.22+1.884.32.1 -> 1.884.38.1 # drivers/char/drm/drm_agpsupport.h 1.10.1.3 -> 1.13 # Makefile 1.193.1.9 -> 1.193.1.10 # drivers/char/agp/agpgart_be.c 1.35.1.7 -> 1.41.1.10 # drivers/char/Config.in 1.36.1.7 -> 1.39.1.4 # include/linux/agp_backend.h 1.13.1.2 -> 1.17 # drivers/net/Makefile 1.29.1.1 -> 1.31 # arch/i386/kernel/mpparse.c 1.9.2.9 -> 1.9.4.2 # arch/i386/config.in 1.34.2.4 -> 1.34.1.5 # arch/i386/kernel/io_apic.c 1.17.2.6 -> 1.17.4.2 # drivers/char/Makefile 1.27.1.3 -> 1.30 # include/linux/pci_ids.h 1.44.1.9 -> 1.46.1.5 # arch/i386/kernel/setup.c 1.50.2.5 -> 1.50.3.4 # drivers/char/drm-4.0/i810_dma.c 1.3.1.1 -> 1.5 # drivers/net/eepro100.c 1.37.1.3 -> 1.37.1.4 # drivers/char/drm-4.0/agpsupport.c 1.1.1.2 -> 1.6 # Documentation/Configure.help 1.128.13.5 -> 1.128.13.6 # diff -Nru a/Documentation/Configure.help b/Documentation/Configure.help --- a/Documentation/Configure.help Wed Oct 8 09:08:22 2003 +++ b/Documentation/Configure.help Wed Oct 8 09:08:22 2003 @@ -11684,6 +11684,18 @@ module, say M here and read as well as . +AMD 8111 (new PCI lance) support +CONFIG_AMD8111_ETH + If you have an AMD 8111-based PCI lance ethernet card, + answer Y here and read the Ethernet-HOWTO, available from + . + + This driver is also available as a module ( = code which can be + inserted in and removed from the running kernel whenever you want). + The module will be called amd8111e.o. If you want to compile it as a + module, say M here and read as well + as . + Ansel Communications EISA 3200 support CONFIG_AC3200 If you have a network (Ethernet) card of this type, say Y and read @@ -14010,10 +14022,10 @@ The module will be called visor.o. If you want to compile it as a module, say M here and read . -USB Compaq iPAQ Driver +USB PocketPC PDA Driver CONFIG_USB_SERIAL_IPAQ - Say Y here if you want to connect to your Compaq iPAQ, HP Jornada 548/568 - or Casio EM500 running Windows CE 3.0 or PocketPC 2002 using a USB + Say Y here if you want to connect to your Compaq iPAQ, HP Jornada, + or any other PDA running Windows CE 3.0 or PocketPC 2002 using a USB cradle/cable. For information on using the driver, read . @@ -14427,7 +14439,7 @@ If in doubt then look at linux/drivers/usb/pegasus.h for the complete list of supported devices. If your particular adapter is not in the list and you are _sure_ it - is Pegasus or Pegasus-II based then send me (pmanolov@users.sourceforge.net) + is Pegasus or Pegasus-II based then send me (petkan@users.sourceforge.net) vendor and device IDs. This code is also available as a module ( = code which can be diff -Nru a/Makefile b/Makefile --- a/Makefile Wed Oct 8 09:08:22 2003 +++ b/Makefile Wed Oct 8 09:08:22 2003 @@ -138,8 +138,7 @@ DRIVERS-y += drivers/char/char.o \ drivers/block/block.o \ drivers/misc/misc.o \ - drivers/net/net.o \ - drivers/media/media.o + drivers/net/net.o DRIVERS-$(CONFIG_AGP) += drivers/char/agp/agp.o DRIVERS-$(CONFIG_DRM_NEW) += drivers/char/drm/drm.o DRIVERS-$(CONFIG_DRM_OLD) += drivers/char/drm-4.0/drm.o @@ -180,6 +179,7 @@ DRIVERS-$(CONFIG_HAMRADIO) += drivers/net/hamradio/hamradio.o DRIVERS-$(CONFIG_TC) += drivers/tc/tc.a DRIVERS-$(CONFIG_USB) += drivers/usb/usbdrv.o +DRIVERS-y +=drivers/media/media.o DRIVERS-$(CONFIG_INPUT) += drivers/input/inputdrv.o DRIVERS-$(CONFIG_HIL) += drivers/hil/hil.o DRIVERS-$(CONFIG_I2O) += drivers/message/i2o/i2o.o diff -Nru a/arch/i386/config.in b/arch/i386/config.in --- a/arch/i386/config.in Wed Oct 8 09:08:22 2003 +++ b/arch/i386/config.in Wed Oct 8 09:08:22 2003 @@ -311,14 +311,6 @@ bool 'Power Management support' CONFIG_PM -if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then - dep_bool ' ACPI support' CONFIG_ACPI $CONFIG_PM - - if [ "$CONFIG_ACPI" != "n" ]; then - source drivers/acpi/Config.in - fi -fi - dep_tristate ' Advanced Power Management BIOS support' CONFIG_APM $CONFIG_PM if [ "$CONFIG_APM" != "n" ]; then bool ' Ignore USER SUSPEND' CONFIG_APM_IGNORE_USER_SUSPEND @@ -329,6 +321,8 @@ bool ' Allow interrupts during APM BIOS calls' CONFIG_APM_ALLOW_INTS bool ' Use real mode APM BIOS call to power off' CONFIG_APM_REAL_MODE_POWER_OFF fi + +source drivers/acpi/Config.in endmenu diff -Nru a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c --- a/arch/i386/kernel/io_apic.c Wed Oct 8 09:08:22 2003 +++ b/arch/i386/kernel/io_apic.c Wed Oct 8 09:08:22 2003 @@ -17,6 +17,7 @@ * thanks to Eric Gilmore * and Rolf G. Tews * for testing these extensively + * Paul Diefenbaugh : Added full ACPI support */ #include @@ -28,6 +29,7 @@ #include #include #include +#include #include #include @@ -1054,6 +1056,10 @@ unsigned char old_id; unsigned long flags; + if (acpi_ioapic) + /* This gets done during IOAPIC enumeration for ACPI. */ + return; + if (clustered_apic_mode) /* We don't have a good way to do this yet - hack */ phys_id_present_map = (u_long) 0xf; @@ -1654,8 +1660,7 @@ printk("ENABLING IO-APIC IRQs\n"); /* - * Set up the IO-APIC IRQ routing table by parsing the MP-BIOS - * mptable: + * Set up IO-APIC IRQ routing. */ setup_ioapic_ids_from_mpc(); sync_Arb_IDs(); @@ -1664,3 +1669,159 @@ check_timer(); print_IO_APIC(); } + + +/* -------------------------------------------------------------------------- + ACPI-based IOAPIC Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +#define IO_APIC_MAX_ID 15 + +int __init io_apic_get_unique_id (int ioapic, int apic_id) +{ + struct IO_APIC_reg_00 reg_00; + static unsigned long apic_id_map = 0; + unsigned long flags; + int i = 0; + + /* + * The P4 platform supports up to 256 APIC IDs on two separate APIC + * buses (one for LAPICs, one for IOAPICs), where predecessors only + * supports up to 16 on one shared APIC bus. + * + * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full + * advantage of new APIC bus architecture. + */ + + if (!apic_id_map) + apic_id_map = phys_cpu_present_map; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_00 = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + if (apic_id >= IO_APIC_MAX_ID) { + printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " + "%d\n", ioapic, apic_id, reg_00.ID); + apic_id = reg_00.ID; + } + + /* + * Every APIC in a system must have a unique ID or we get lots of nice + * 'stuck on smp_invalidate_needed IPI wait' messages. + */ + if (apic_id_map & (1 << apic_id)) { + + for (i = 0; i < IO_APIC_MAX_ID; i++) { + if (!(apic_id_map & (1 << i))) + break; + } + + if (i == IO_APIC_MAX_ID) + panic("Max apic_id exceeded!\n"); + + printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " + "trying %d\n", ioapic, apic_id, i); + + apic_id = i; + } + + apic_id_map |= (1 << apic_id); + + if (reg_00.ID != apic_id) { + reg_00.ID = apic_id; + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0, *(int *)®_00); + *(int *)®_00 = io_apic_read(ioapic, 0); + spin_unlock_irqrestore(&ioapic_lock, flags); + + /* Sanity check */ + if (reg_00.ID != apic_id) + panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic); + } + + printk(KERN_INFO "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); + + return apic_id; +} + + +int __init io_apic_get_version (int ioapic) +{ + struct IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_01 = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.version; +} + + +int __init io_apic_get_redir_entries (int ioapic) +{ + struct IO_APIC_reg_01 reg_01; + unsigned long flags; + + spin_lock_irqsave(&ioapic_lock, flags); + *(int *)®_01 = io_apic_read(ioapic, 1); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return reg_01.entries; +} + + +int io_apic_set_pci_routing (int ioapic, int pin, int irq) +{ + struct IO_APIC_route_entry entry; + unsigned long flags; + + if (!IO_APIC_IRQ(irq)) { + printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0/n", + ioapic); + return -EINVAL; + } + + /* + * Generate a PCI IRQ routing entry and program the IOAPIC accordingly. + * Note that we mask (disable) IRQs now -- these get enabled when the + * corresponding device driver registers for this IRQ. + */ + + memset(&entry,0,sizeof(entry)); + + entry.delivery_mode = dest_LowestPrio; + entry.dest_mode = INT_DELIVERY_MODE; + entry.dest.logical.logical_dest = TARGET_CPUS; + entry.mask = 1; /* Disabled (masked) */ + entry.trigger = 1; /* Level sensitive */ + entry.polarity = 1; /* Low active */ + + add_pin_to_irq(irq, ioapic, pin); + + entry.vector = assign_irq_vector(irq); + + printk(KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> " + "IRQ %d)\n", ioapic, + mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq); + + irq_desc[irq].handler = &ioapic_level_irq_type; + + set_intr_gate(entry.vector, interrupt[irq]); + + if (!ioapic && (irq < 16)) + disable_8259A_irq(irq); + + spin_lock_irqsave(&ioapic_lock, flags); + io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1)); + io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0)); + spin_unlock_irqrestore(&ioapic_lock, flags); + + return entry.vector; +} + +#endif /*CONFIG_ACPI_BOOT*/ diff -Nru a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c --- a/arch/i386/kernel/mpparse.c Wed Oct 8 09:08:22 2003 +++ b/arch/i386/kernel/mpparse.c Wed Oct 8 09:08:22 2003 @@ -9,12 +9,14 @@ * Erich Boleyn : MP v1.4 and additional changes. * Alan Cox : Added EBDA scanning * Ingo Molnar : various cleanups and rewrites - * Maciej W. Rozycki : Bits for default MP configurations + * Maciej W. Rozycki: Bits for default MP configurations + * Paul Diefenbaugh: Added full ACPI support */ #include #include #include +#include #include #include #include @@ -23,9 +25,11 @@ #include #include +#include #include #include #include +#include #include /* Have we found an MP table */ @@ -135,12 +139,6 @@ return n; } -#ifdef CONFIG_X86_IO_APIC -extern int have_acpi_tables; /* set by acpitable.c */ -#else -#define have_acpi_tables (0) -#endif - /* * Have to match translation table entries to main table entries by counter * hence the mpc_record variable .... can't see a less disgusting way of @@ -445,10 +443,11 @@ printk("APIC at: 0x%lX\n",mpc->mpc_lapic); - /* save the local APIC address, it might be non-default, - * but only if we're not using the ACPI tables + /* + * Save the local APIC address (it might be non-default) -- but only + * if we're not using ACPI. */ - if (!have_acpi_tables) + if (!acpi_lapic) mp_lapic_addr = mpc->mpc_lapic; if ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ) && mpc->mpc_oemptr) { @@ -528,9 +527,8 @@ { struct mpc_config_processor *m= (struct mpc_config_processor *)mpt; - - /* ACPI may already have provided this one for us */ - if (!have_acpi_tables) + /* ACPI may have already provided this data */ + if (!acpi_lapic) MP_processor_info(m); mpt += sizeof(*m); count += sizeof(*m); @@ -759,7 +757,6 @@ } static struct intel_mp_floating *mpf_found; -extern void config_acpi_tables(void); /* * Scan the memory blocks for an SMP configuration block. @@ -768,17 +765,19 @@ { struct intel_mp_floating *mpf = mpf_found; -#ifdef CONFIG_X86_IO_APIC /* - * Check if the ACPI tables are provided. Use them only to get - * the processor information, mainly because it provides - * the info on the logical processor(s), rather than the physical - * processor(s) that are provided by the MPS. We attempt to - * check only if the user provided a commandline override + * ACPI may be used to obtain the entire SMP configuration or just to + * enumerate/configure processors (CONFIG_ACPI_HT_ONLY). Note that + * ACPI supports both logical (e.g. Hyper-Threading) and physical + * processors, where MPS only supports physical. */ - config_acpi_tables(); -#endif - + if (acpi_lapic && acpi_ioapic) { + printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); + return; + } + else if (acpi_lapic) + printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); + printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); if (mpf->mpf_feature2 & (1<<7)) { printk(" IMCR and PIC compatibility mode.\n"); @@ -900,7 +899,7 @@ address = *(unsigned short *)phys_to_virt(0x40E); address <<= 4; - smp_scan_config(address, 0x1000); + smp_scan_config(address, 0x400); } #else @@ -937,3 +936,315 @@ #endif } + +/* -------------------------------------------------------------------------- + ACPI-based MP Configuration + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_BOOT + +void __init mp_register_lapic_address ( + u64 address) +{ + mp_lapic_addr = (unsigned long) address; + + set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); + + if (boot_cpu_physical_apicid == -1U) + boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); + + Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); +} + + +void __init mp_register_lapic ( + u8 id, + u8 enabled) +{ + struct mpc_config_processor processor; + int boot_cpu = 0; + + if (id >= MAX_APICS) { + printk(KERN_WARNING "Processor #%d invalid (max %d)\n", + id, MAX_APICS); + return; + } + + if (id == boot_cpu_physical_apicid) + boot_cpu = 1; + + processor.mpc_type = MP_PROCESSOR; + processor.mpc_apicid = id; + processor.mpc_apicver = 0x10; /* TBD: lapic version */ + processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); + processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); + processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | + (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; + processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; + processor.mpc_reserved[0] = 0; + processor.mpc_reserved[1] = 0; + + MP_processor_info(&processor); +} + +#ifdef CONFIG_X86_IO_APIC + +#define MP_ISA_BUS 0 +#define MP_MAX_IOAPIC_PIN 127 + +struct mp_ioapic_routing { + int apic_id; + int irq_start; + int irq_end; + u32 pin_programmed[4]; +} mp_ioapic_routing[MAX_IO_APICS]; + + +static int __init mp_find_ioapic ( + int irq) +{ + int i = 0; + + /* Find the IOAPIC that manages this IRQ. */ + for (i = 0; i < nr_ioapics; i++) { + if ((irq >= mp_ioapic_routing[i].irq_start) + && (irq <= mp_ioapic_routing[i].irq_end)) + return i; + } + + printk(KERN_ERR "ERROR: Unable to locate IOAPIC for IRQ %d/n", irq); + + return -1; +} + + +void __init mp_register_ioapic ( + u8 id, + u32 address, + u32 irq_base) +{ + int idx = 0; + + if (nr_ioapics >= MAX_IO_APICS) { + printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " + "(found %d)\n", MAX_IO_APICS, nr_ioapics); + panic("Recompile kernel with bigger MAX_IO_APICS!\n"); + } + if (!address) { + printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" + " found in MADT table, skipping!\n"); + return; + } + + idx = nr_ioapics++; + + mp_ioapics[idx].mpc_type = MP_IOAPIC; + mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; + mp_ioapics[idx].mpc_apicaddr = address; + + set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); + mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); + mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); + + /* + * Build basic IRQ lookup table to facilitate irq->io_apic lookups + * and to prevent reprogramming of IOAPIC pins (PCI IRQs). + */ + mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; + mp_ioapic_routing[idx].irq_start = irq_base; + mp_ioapic_routing[idx].irq_end = irq_base + + io_apic_get_redir_entries(idx); + + printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " + "IRQ %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, + mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, + mp_ioapic_routing[idx].irq_start, + mp_ioapic_routing[idx].irq_end); + + return; +} + + +void __init mp_override_legacy_irq ( + u8 bus_irq, + u8 polarity, + u8 trigger, + u32 global_irq) +{ + struct mpc_config_intsrc intsrc; + int i = 0; + int found = 0; + int ioapic = -1; + int pin = -1; + + /* + * Convert 'global_irq' to 'ioapic.pin'. + */ + ioapic = mp_find_ioapic(global_irq); + if (ioapic < 0) + return; + pin = global_irq - mp_ioapic_routing[ioapic].irq_start; + + /* + * TBD: This check is for faulty timer entries, where the override + * erroneously sets the trigger to level, resulting in a HUGE + * increase of timer interrupts! + */ + if ((bus_irq == 0) && (global_irq == 2) && (trigger == 3)) + trigger = 1; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqtype = mp_INT; + intsrc.mpc_irqflag = (trigger << 2) | polarity; + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ + intsrc.mpc_dstirq = pin; /* INTIN# */ + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", + intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); + + /* + * If an existing [IOAPIC.PIN -> IRQ] routing entry exists we override it. + * Otherwise create a new entry (e.g. global_irq == 2). + */ + for (i = 0; i < mp_irq_entries; i++) { + if ((mp_irqs[i].mpc_dstapic == intsrc.mpc_dstapic) + && (mp_irqs[i].mpc_dstirq == intsrc.mpc_dstirq)) { + mp_irqs[i] = intsrc; + found = 1; + break; + } + } + if (!found) { + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } + + return; +} + + +void __init mp_config_acpi_legacy_irqs (void) +{ + struct mpc_config_intsrc intsrc; + int i = 0; + int ioapic = -1; + + /* + * Fabricate the legacy ISA bus (bus #31). + */ + mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; + Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); + + /* + * Locate the IOAPIC that manages the ISA IRQs (0-15). + */ + ioapic = mp_find_ioapic(0); + if (ioapic < 0) + return; + + intsrc.mpc_type = MP_INTSRC; + intsrc.mpc_irqflag = 0; /* Conforming */ + intsrc.mpc_srcbus = MP_ISA_BUS; + intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; + + /* + * Use the default configuration for the IRQs 0-15. These may be + * overriden by (MADT) interrupt source override entries. + */ + for (i = 0; i < 16; i++) { + + if (i == 2) continue; /* Don't connect IRQ2 */ + + intsrc.mpc_irqtype = i ? mp_INT : mp_ExtINT; /* 8259A to #0 */ + intsrc.mpc_srcbusirq = i; /* Identity mapped */ + intsrc.mpc_dstirq = i; + + Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " + "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, + (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, + intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, + intsrc.mpc_dstirq); + + mp_irqs[mp_irq_entries] = intsrc; + if (++mp_irq_entries == MAX_IRQ_SOURCES) + panic("Max # of irq sources exceeded!\n"); + } + + return; +} + +#endif /*CONFIG_X86_IO_APIC*/ + +#ifdef CONFIG_ACPI_PCI + +void __init mp_parse_prt (void) +{ + struct list_head *node = NULL; + struct acpi_prt_entry *entry = NULL; + int vector = 0; + int ioapic = -1; + int ioapic_pin = 0; + int irq = 0; + int idx, bit = 0; + + /* + * Parsing through the PCI Interrupt Routing Table (PRT) and program + * routing for all static (IOAPIC-direct) entries. + */ + list_for_each(node, &acpi_prt.entries) { + entry = list_entry(node, struct acpi_prt_entry, node); + + /* We're only interested in static (non-link) entries. */ + if (entry->link.handle) + continue; + + irq = entry->link.index; + ioapic = mp_find_ioapic(irq); + if (ioapic < 0) + continue; + ioapic_pin = irq - mp_ioapic_routing[ioapic].irq_start; + + /* + * Avoid pin reprogramming. PRTs typically include entries + * with redundant pin->irq mappings (but unique PCI devices); + * we only only program the IOAPIC on the first. + */ + bit = ioapic_pin % 32; + idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); + if (idx > 3) { + printk(KERN_ERR "Invalid reference to IOAPIC pin " + "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, + ioapic_pin); + continue; + } + if ((1<irq = irq; + continue; + } + + mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<irq = irq; + + printk(KERN_DEBUG "%02x:%02x:%02x[%c] -> %d-%d -> vector 0x%02x" + " -> IRQ %d\n", entry->id.segment, entry->id.bus, + entry->id.device, ('A' + entry->pin), + mp_ioapic_routing[ioapic].apic_id, ioapic_pin, vector, + entry->irq); + } + + return; +} + +#endif /*CONFIG_ACPI_PCI*/ + +#endif /*CONFIG_ACPI_BOOT*/ diff -Nru a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c --- a/arch/i386/kernel/setup.c Wed Oct 8 09:08:22 2003 +++ b/arch/i386/kernel/setup.c Wed Oct 8 09:08:22 2003 @@ -95,6 +95,7 @@ #include #include #include +#include #include #ifdef CONFIG_BLK_DEV_RAM #include @@ -174,8 +175,6 @@ static u32 disabled_x86_caps[NCAPINTS] __initdata = { 0 }; extern int blk_nohighio; -int enable_acpi_smp_table; - /* * This is set up by the setup-routine at boot-time */ @@ -1007,10 +1006,15 @@ */ reserve_bootmem(PAGE_SIZE, PAGE_SIZE); #endif - +#ifdef CONFIG_ACPI_SLEEP + /* + * Reserve low memory region for sleep support. + */ + acpi_reserve_bootmem(); +#endif #ifdef CONFIG_X86_LOCAL_APIC /* - * Find and reserve possible boot-time SMP configuration: + * Find and reserve possible boot-time SMP configuration. */ find_smp_config(); #endif @@ -1154,12 +1158,23 @@ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/ #endif paging_init(); + +#ifdef CONFIG_ACPI_BOOT + /* + * Parse the ACPI tables for possible boot-time SMP configuration. + */ + acpi_boot_init(*cmdline_p); +#endif #ifdef CONFIG_X86_LOCAL_APIC /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); + /* + * Validate APIC configuration and map into corresponding fixmap pages. + */ + init_apic_mappings(); #endif register_memory(max_low_pfn); diff -Nru a/drivers/char/Config.in b/drivers/char/Config.in --- a/drivers/char/Config.in Wed Oct 8 09:08:22 2003 +++ b/drivers/char/Config.in Wed Oct 8 09:08:22 2003 @@ -306,6 +306,7 @@ bool ' ALI chipset support' CONFIG_AGP_ALI bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS if [ "$CONFIG_IA64" = "y" ]; then + bool ' Intel 460GX support' CONFIG_AGP_I460 bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 fi fi diff -Nru a/drivers/char/Makefile b/drivers/char/Makefile --- a/drivers/char/Makefile Wed Oct 8 09:08:22 2003 +++ b/drivers/char/Makefile Wed Oct 8 09:08:22 2003 @@ -182,6 +182,7 @@ obj-$(CONFIG_HIL) += hp_keyb.o obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o +obj-$(CONFIG_HP_SIMSERIAL) += simserial.o obj-$(CONFIG_ROCKETPORT) += rocket.o obj-$(CONFIG_MOXA_SMARTIO) += mxser.o obj-$(CONFIG_MOXA_INTELLIO) += moxa.o diff -Nru a/drivers/char/agp/agpgart_be.c b/drivers/char/agp/agpgart_be.c --- a/drivers/char/agp/agpgart_be.c Wed Oct 8 09:08:22 2003 +++ b/drivers/char/agp/agpgart_be.c Wed Oct 8 09:08:22 2003 @@ -39,10 +39,14 @@ #include #include #include +#include #include #include #include #include +#include +#include +#include #include #include "agp.h" @@ -207,10 +211,14 @@ agp_bridge.free_by_type(curr); return; } - if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - agp_bridge.agp_destroy_page((unsigned long) - phys_to_virt(curr->memory[i])); + if (agp_bridge.cant_use_aperture) { + vfree(curr->vmptr); + } else { + if (curr->page_count != 0) { + for (i = 0; i < curr->page_count; i++) { + agp_bridge.agp_destroy_page((unsigned long) + phys_to_virt(curr->memory[i])); + } } } agp_free_key(curr->key); @@ -219,6 +227,37 @@ MOD_DEC_USE_COUNT; } +#define IN_VMALLOC(_x) (((_x) >= VMALLOC_START) && ((_x) < VMALLOC_END)) + +/* + * Look up and return the pte corresponding to addr. We only do this for + * agp_ioremap'ed addresses. + */ +static pte_t *agp_lookup_pte(unsigned long addr) +{ + pgd_t *dir; + pmd_t *pmd; + pte_t *pte; + + if (!IN_VMALLOC(addr)) + return NULL; + + dir = pgd_offset_k(addr); + pmd = pmd_offset(dir, addr); + + if (pmd) { + pte = pte_offset(pmd, addr); + + if (pte) { + return pte; + } else { + return NULL; + } + } else { + return NULL; + } +} + #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) agp_memory *agp_allocate_memory(size_t page_count, u32 type) @@ -253,18 +292,43 @@ MOD_DEC_USE_COUNT; return NULL; } - for (i = 0; i < page_count; i++) { - new->memory[i] = agp_bridge.agp_alloc_page(); - if (new->memory[i] == 0) { - /* Free this structure */ - agp_free_memory(new); + if (agp_bridge.cant_use_aperture) { + void *vmblock; + unsigned long vaddr; + pte_t *pte; + + vmblock = __vmalloc(page_count << PAGE_SHIFT, GFP_KERNEL, PAGE_KERNEL); + if (vmblock == NULL) { + MOD_DEC_USE_COUNT; return NULL; } - new->memory[i] = virt_to_phys((void *) new->memory[i]); - new->page_count++; - } + new->vmptr = vmblock; + vaddr = (unsigned long) vmblock; + + for (i = 0; i < page_count; i++, vaddr += PAGE_SIZE) { + pte = agp_lookup_pte(vaddr); + if (pte == NULL) { + MOD_DEC_USE_COUNT; + return NULL; + } + new->memory[i] = virt_to_phys(page_address(pte_page(*pte))); + } + new->page_count = page_count; + } else { + for (i = 0; i < page_count; i++) { + new->memory[i] = agp_bridge.agp_alloc_page(); + + if (new->memory[i] == 0) { + /* Free this structure */ + agp_free_memory(new); + return NULL; + } + new->memory[i] = virt_to_phys((void *) new->memory[i]); + new->page_count++; + } + } return new; } @@ -305,6 +369,9 @@ /* Routine to copy over information structure */ +/* AGP bridge need not be PCI device, but DRM thinks it is. */ +static struct pci_dev fake_bridge_dev; + int agp_copy_info(agp_kern_info * info) { memset(info, 0, sizeof(agp_kern_info)); @@ -314,7 +381,7 @@ } info->version.major = agp_bridge.version->major; info->version.minor = agp_bridge.version->minor; - info->device = agp_bridge.dev; + info->device = agp_bridge.dev ? agp_bridge.dev : &fake_bridge_dev; info->chipset = agp_bridge.type; info->mode = agp_bridge.mode; info->aper_base = agp_bridge.gart_bus_addr; @@ -388,97 +455,104 @@ /* Generic Agp routines - Start */ -static void agp_generic_agp_enable(u32 mode) +static u32 agp_collect_device_status(u32 mode, u32 command) { - struct pci_dev *device = NULL; - u32 command, scratch; - u8 cap_ptr; - - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + 4, - &command); - - /* - * PASS1: go throu all devices that claim to be - * AGP devices and collect their data. - */ - + struct pci_dev *device; + u8 agp; + u32 scratch; pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) { - /* - * Ok, here we have a AGP device. Disable impossible - * settings, and adjust the readqueue to the minimum. - */ + agp = pci_find_capability(device, PCI_CAP_ID_AGP); + if (!agp) + continue; + + /* + * Ok, here we have a AGP device. Disable impossible + * settings, and adjust the readqueue to the minimum. + */ + pci_read_config_dword(device, agp + PCI_AGP_STATUS, &scratch); - pci_read_config_dword(device, cap_ptr + 4, &scratch); + /* adjust RQ depth */ + command = + ((command & ~0xff000000) | + min_t(u32, (mode & 0xff000000), + min_t(u32, (command & 0xff000000), + (scratch & 0xff000000)))); + + /* disable SBA if it's not supported */ + if (!((command & 0x00000200) && + (scratch & 0x00000200) && + (mode & 0x00000200))) + command &= ~0x00000200; + + /* disable FW if it's not supported */ + if (!((command & 0x00000010) && + (scratch & 0x00000010) && + (mode & 0x00000010))) + command &= ~0x00000010; - /* adjust RQ depth */ - command = - ((command & ~0xff000000) | - min_t(u32, (mode & 0xff000000), - min_t(u32, (command & 0xff000000), - (scratch & 0xff000000)))); - - /* disable SBA if it's not supported */ - if (!((command & 0x00000200) && - (scratch & 0x00000200) && - (mode & 0x00000200))) - command &= ~0x00000200; - - /* disable FW if it's not supported */ - if (!((command & 0x00000010) && - (scratch & 0x00000010) && - (mode & 0x00000010))) - command &= ~0x00000010; - - if (!((command & 4) && - (scratch & 4) && - (mode & 4))) - command &= ~0x00000004; - - if (!((command & 2) && - (scratch & 2) && - (mode & 2))) - command &= ~0x00000002; - - if (!((command & 1) && - (scratch & 1) && - (mode & 1))) - command &= ~0x00000001; - } + if (!((command & 4) && + (scratch & 4) && + (mode & 4))) + command &= ~0x00000004; + + if (!((command & 2) && + (scratch & 2) && + (mode & 2))) + command &= ~0x00000002; + + if (!((command & 1) && + (scratch & 1) && + (mode & 1))) + command &= ~0x00000001; } - /* - * PASS2: Figure out the 4X/2X/1X setting and enable the - * target (our motherboard chipset). - */ - if (command & 4) { + if (command & 4) command &= ~3; /* 4X */ + if (command & 2) + command &= ~5; /* 2X (8X for AGP3.0) */ + if (command & 1) + command &= ~6; /* 1X (4X for AGP3.0) */ + + return command; +} + +static void agp_device_command(u32 command, int agp_v3) +{ + struct pci_dev *device; + int mode; + + mode = command & 0x7; + if (agp_v3) + mode *= 4; + + pci_for_each_dev(device) { + u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP); + if (!agp) + continue; + + printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", + agp_v3 ? 3 : 2, device->slot_name, mode); + pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command); } - if (command & 2) { - command &= ~5; /* 2X */ - } - if (command & 1) { - command &= ~6; /* 1X */ - } +} + +static void agp_generic_agp_enable(u32 mode) +{ + u32 command; + + pci_read_config_dword(agp_bridge.dev, + agp_bridge.capndx + PCI_AGP_STATUS, + &command); + + command = agp_collect_device_status(mode, command); command |= 0x00000100; pci_write_config_dword(agp_bridge.dev, - agp_bridge.capndx + 8, + agp_bridge.capndx + PCI_AGP_COMMAND, command); - /* - * PASS3: Go throu all AGP devices and update the - * command registers. - */ - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) - pci_write_config_dword(device, cap_ptr + 8, command); - } + agp_device_command(command, 0); } static int agp_generic_create_gatt_table(void) @@ -1381,9 +1455,605 @@ #endif /* CONFIG_AGP_I810 */ - #ifdef CONFIG_AGP_INTEL +#ifdef CONFIG_AGP_I460 -#endif /* CONFIG_AGP_I810 */ +/* BIOS configures the chipset so that one of two apbase registers are used */ +static u8 intel_i460_dynamic_apbase = 0x10; + +/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */ +static u8 intel_i460_pageshift = 12; +static u32 intel_i460_pagesize; + +/* Keep track of which is larger, chipset or kernel page size. */ +static u32 intel_i460_cpk = 1; + +/* Structure for tracking partial use of 4MB GART pages */ +static u32 **i460_pg_detail = NULL; +static u32 *i460_pg_count = NULL; + +#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift) +#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT) + +#define I460_SRAM_IO_DISABLE (1 << 4) +#define I460_BAPBASE_ENABLE (1 << 3) +#define I460_AGPSIZ_MASK 0x7 +#define I460_4M_PS (1 << 1) + +#define log2(x) ffz(~(x)) + +static gatt_mask intel_i460_masks[] = +{ + { INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT, 0 } +}; + +static unsigned long intel_i460_mask_memory(unsigned long addr, int type) +{ + /* Make sure the returned address is a valid GATT entry */ + return (agp_bridge.masks[0].mask | (((addr & + ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12)); +} + +static unsigned long intel_i460_unmask_memory(unsigned long addr) +{ + /* Turn a GATT entry into a physical address */ + return ((addr & 0xffffff) << 12); +} + +static int intel_i460_fetch_size(void) +{ + int i; + u8 temp; + aper_size_info_8 *values; + + /* Determine the GART page size */ + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp); + intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12; + intel_i460_pagesize = 1UL << intel_i460_pageshift; + + values = A_SIZE_8(agp_bridge.aperture_sizes); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + + /* Exit now if the IO drivers for the GART SRAMS are turned off */ + if (temp & I460_SRAM_IO_DISABLE) { + printk(KERN_WARNING PFX "GART SRAMS disabled on 460GX chipset\n"); + printk(KERN_WARNING PFX "AGPGART operation not possible\n"); + return 0; + } + + /* Make sure we don't try to create an 2 ^ 23 entry GATT */ + if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) { + printk(KERN_WARNING PFX "We can't have a 32GB aperture with 4KB" + " GART pages\n"); + return 0; + } + + /* Determine the proper APBASE register */ + if (temp & I460_BAPBASE_ENABLE) + intel_i460_dynamic_apbase = INTEL_I460_BAPBASE; + else + intel_i460_dynamic_apbase = INTEL_I460_APBASE; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + + /* + * Dynamically calculate the proper num_entries and page_order + * values for the define aperture sizes. Take care not to + * shift off the end of values[i].size. + */ + values[i].num_entries = (values[i].size << 8) >> + (intel_i460_pageshift - 12); + values[i].page_order = log2((sizeof(u32)*values[i].num_entries) + >> PAGE_SHIFT); + } + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + /* Neglect control bits when matching up size_value */ + if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + + return 0; +} + +/* There isn't anything to do here since 460 has no GART TLB. */ +static void intel_i460_tlb_flush(agp_memory * mem) +{ + return; +} + +/* + * This utility function is needed to prevent corruption of the control bits + * which are stored along with the aperture size in 460's AGPSIZ register + */ +static void intel_i460_write_agpsiz(u8 size_value) +{ + u8 temp; + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, &temp); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_AGPSIZ, + ((temp & ~I460_AGPSIZ_MASK) | size_value)); +} + +static void intel_i460_cleanup(void) +{ + aper_size_info_8 *previous_size; + + previous_size = A_SIZE_8(agp_bridge.previous_size); + intel_i460_write_agpsiz(previous_size->size_value); + + if (intel_i460_cpk == 0) { + vfree(i460_pg_detail); + vfree(i460_pg_count); + } +} + + +/* Control bits for Out-Of-GART coherency and Burst Write Combining */ +#define I460_GXBCTL_OOG (1UL << 0) +#define I460_GXBCTL_BWC (1UL << 2) + +static int intel_i460_configure(void) +{ + union { + u32 small[2]; + u64 large; + } temp; + u8 scratch; + int i; + + aper_size_info_8 *current_size; + + temp.large = 0; + + current_size = A_SIZE_8(agp_bridge.current_size); + intel_i460_write_agpsiz(current_size->size_value); + + /* + * Do the necessary rigmarole to read all eight bytes of APBASE. + * This has to be done since the AGP aperture can be above 4GB on + * 460 based systems. + */ + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, + &(temp.small[0])); + pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, + &(temp.small[1])); + + /* Clear BAR control bits */ + agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1); + + pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &scratch); + pci_write_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, + (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC); + + /* + * Initialize partial allocation trackers if a GART page is bigger than + * a kernel page. + */ + if (I460_CPAGES_PER_KPAGE >= 1) { + intel_i460_cpk = 1; + } else { + intel_i460_cpk = 0; + + i460_pg_detail = (void *) vmalloc(sizeof(*i460_pg_detail) * + current_size->num_entries); + i460_pg_count = (void *) vmalloc(sizeof(*i460_pg_count) * + current_size->num_entries); + + for (i = 0; i < current_size->num_entries; i++) { + i460_pg_count[i] = 0; + i460_pg_detail[i] = NULL; + } + } + + return 0; +} + +static int intel_i460_create_gatt_table(void) { + + char *table; + int i; + int page_order; + int num_entries; + void *temp; + unsigned int read_back; + + /* + * Load up the fixed address of the GART SRAMS which hold our + * GATT table. + */ + table = (char *) __va(INTEL_I460_ATTBASE); + + temp = agp_bridge.current_size; + page_order = A_SIZE_8(temp)->page_order; + num_entries = A_SIZE_8(temp)->num_entries; + + agp_bridge.gatt_table_real = (u32 *) table; + agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), + (PAGE_SIZE * (1 << page_order))); + agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real); + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[i - 1]; + + return 0; +} + +static int intel_i460_free_gatt_table(void) +{ + int num_entries; + int i; + void *temp; + unsigned int read_back; + + temp = agp_bridge.current_size; + + num_entries = A_SIZE_8(temp)->num_entries; + + for (i = 0; i < num_entries; i++) { + agp_bridge.gatt_table[i] = 0; + } + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[i - 1]; + + iounmap(agp_bridge.gatt_table); + + return 0; +} + +/* These functions are called when PAGE_SIZE exceeds the GART page size */ + +static int intel_i460_insert_memory_cpk(agp_memory * mem, + off_t pg_start, int type) +{ + int i, j, k, num_entries; + void *temp; + unsigned long paddr; + unsigned int read_back; + + /* + * The rest of the kernel will compute page offsets in terms of + * PAGE_SIZE. + */ + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) { + printk(KERN_WARNING PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + j = pg_start; + while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) { + if (!PGE_EMPTY(agp_bridge.gatt_table[j])) { + return -EBUSY; + } + j++; + } + + for (i = 0, j = pg_start; i < mem->page_count; i++) { + + paddr = mem->memory[i]; + + for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize) + agp_bridge.gatt_table[j] = (unsigned int) + agp_bridge.mask_memory(paddr, mem->type); + } + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[j - 1]; + + return 0; +} + +static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, + int type) +{ + int i; + unsigned int read_back; + + pg_start = I460_CPAGES_PER_KPAGE * pg_start; + + for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * + mem->page_count); i++) + agp_bridge.gatt_table[i] = 0; + + /* + * The 460 spec says we have to read the last location written to + * make sure that all writes have taken effect + */ + read_back = agp_bridge.gatt_table[i - 1]; + + return 0; +} + +/* + * These functions are called when the GART page size exceeds PAGE_SIZE. + * + * This situation is interesting since AGP memory allocations that are + * smaller than a single GART page are possible. The structures i460_pg_count + * and i460_pg_detail track partial allocation of the large GART pages to + * work around this issue. + * + * i460_pg_count[pg_num] tracks the number of kernel pages in use within + * GART page pg_num. i460_pg_detail[pg_num] is an array containing a + * psuedo-GART entry for each of the aforementioned kernel pages. The whole + * of i460_pg_detail is equivalent to a giant GATT with page size equal to + * that of the kernel. + */ + +static void *intel_i460_alloc_large_page(int pg_num) +{ + int i; + void *bp, *bp_end; + struct page *page; + + i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * + I460_KPAGES_PER_CPAGE); + if (i460_pg_detail[pg_num] == NULL) { + printk(KERN_WARNING PFX "Out of memory, we're in trouble...\n"); + return NULL; + } + + for (i = 0; i < I460_KPAGES_PER_CPAGE; i++) + i460_pg_detail[pg_num][i] = 0; + + bp = (void *) __get_free_pages(GFP_KERNEL, + intel_i460_pageshift - PAGE_SHIFT); + if (bp == NULL) { + printk(KERN_WARNING PFX "Couldn't alloc 4M GART page...\n"); + return NULL; + } + + bp_end = bp + ((PAGE_SIZE * + (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1); + + for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) { + atomic_inc(&page->count); + set_bit(PG_locked, &page->flags); + atomic_inc(&agp_bridge.current_memory_agp); + } + + return bp; +} + +static void intel_i460_free_large_page(int pg_num, unsigned long addr) +{ + struct page *page; + void *bp, *bp_end; + + bp = (void *) __va(addr); + bp_end = bp + (PAGE_SIZE * + (1 << (intel_i460_pageshift - PAGE_SHIFT))); + + vfree(i460_pg_detail[pg_num]); + i460_pg_detail[pg_num] = NULL; + + for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) { + put_page(page); + UnlockPage(page); + atomic_dec(&agp_bridge.current_memory_agp); + } + + free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT); +} + +static int intel_i460_insert_memory_kpc(agp_memory * mem, + off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned int read_back; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / + I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % + I460_KPAGES_PER_CPAGE; + + if (end_pg > num_entries) { + printk(KERN_WARNING PFX "Looks like we're out of AGP memory\n"); + return -EINVAL; + } + + /* Check if the requested region of the aperture is free */ + for (pg = start_pg; pg <= end_pg; pg++) { + /* Allocate new GART pages if necessary */ + if (i460_pg_detail[pg] == NULL) { + temp = intel_i460_alloc_large_page(pg); + if (temp == NULL) + return -ENOMEM; + agp_bridge.gatt_table[pg] = agp_bridge.mask_memory( + (unsigned long) temp, 0); + read_back = agp_bridge.gatt_table[pg]; + } + + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) + : I460_KPAGES_PER_CPAGE); + idx++) { + if(i460_pg_detail[pg][idx] != 0) + return -EBUSY; + } + } + + for (pg = start_pg, i = 0; pg <= end_pg; pg++) { + paddr = intel_i460_unmask_memory(agp_bridge.gatt_table[pg]); + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) + : I460_KPAGES_PER_CPAGE); + idx++, i++) { + mem->memory[i] = paddr + (idx * PAGE_SIZE); + i460_pg_detail[pg][idx] = + agp_bridge.mask_memory(mem->memory[i], mem->type); + + i460_pg_count[pg]++; + } + } + + return 0; +} + +static int intel_i460_remove_memory_kpc(agp_memory * mem, + off_t pg_start, int type) +{ + int i, pg, start_pg, end_pg, start_offset, end_offset, idx; + int num_entries; + void *temp; + unsigned int read_back; + unsigned long paddr; + + temp = agp_bridge.current_size; + num_entries = A_SIZE_8(temp)->num_entries; + + /* Figure out what pg_start means in terms of our large GART pages */ + start_pg = pg_start / I460_KPAGES_PER_CPAGE; + start_offset = pg_start % I460_KPAGES_PER_CPAGE; + end_pg = (pg_start + mem->page_count - 1) / + I460_KPAGES_PER_CPAGE; + end_offset = (pg_start + mem->page_count - 1) % + I460_KPAGES_PER_CPAGE; + + for (i = 0, pg = start_pg; pg <= end_pg; pg++) { + for (idx = ((pg == start_pg) ? start_offset : 0); + idx < ((pg == end_pg) ? (end_offset + 1) + : I460_KPAGES_PER_CPAGE); + idx++, i++) { + mem->memory[i] = 0; + i460_pg_detail[pg][idx] = 0; + i460_pg_count[pg]--; + } + + /* Free GART pages if they are unused */ + if (i460_pg_count[pg] == 0) { + paddr = intel_i460_unmask_memory(agp_bridge.gatt_table[pg]); + agp_bridge.gatt_table[pg] = agp_bridge.scratch_page; + read_back = agp_bridge.gatt_table[pg]; + + intel_i460_free_large_page(pg, paddr); + } + } + + return 0; +} + +/* Dummy routines to call the approriate {cpk,kpc} function */ + +static int intel_i460_insert_memory(agp_memory * mem, + off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_insert_memory_cpk(mem, pg_start, type); + else + return intel_i460_insert_memory_kpc(mem, pg_start, type); +} + +static int intel_i460_remove_memory(agp_memory * mem, + off_t pg_start, int type) +{ + if (intel_i460_cpk) + return intel_i460_remove_memory_cpk(mem, pg_start, type); + else + return intel_i460_remove_memory_kpc(mem, pg_start, type); +} + +/* + * If the kernel page size is smaller that the chipset page size, we don't + * want to allocate memory until we know where it is to be bound in the + * aperture (a multi-kernel-page alloc might fit inside of an already + * allocated GART page). Consequently, don't allocate or free anything + * if i460_cpk (meaning chipset pages per kernel page) isn't set. + * + * Let's just hope nobody counts on the allocated AGP memory being there + * before bind time (I don't think current drivers do)... + */ +static unsigned long intel_i460_alloc_page(void) +{ + if (intel_i460_cpk) + return agp_generic_alloc_page(); + + /* Returning NULL would cause problems */ + return ((unsigned long) ~0UL); +} + +static void intel_i460_destroy_page(unsigned long page) +{ + if (intel_i460_cpk) + agp_generic_destroy_page(page); +} + +static aper_size_info_8 intel_i460_sizes[3] = +{ + /* + * The 32GB aperture is only available with a 4M GART page size. + * Due to the dynamic GART page size, we can't figure out page_order + * or num_entries until runtime. + */ + {32768, 0, 0, 4}, + {1024, 0, 0, 2}, + {256, 0, 0, 1} +}; + +static int __init intel_i460_setup(struct pci_dev *pdev) +{ + agp_bridge.masks = intel_i460_masks; + agp_bridge.aperture_sizes = (void *) intel_i460_sizes; + agp_bridge.size_type = U8_APER_SIZE; + agp_bridge.num_aperture_sizes = 3; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.configure = intel_i460_configure; + agp_bridge.fetch_size = intel_i460_fetch_size; + agp_bridge.cleanup = intel_i460_cleanup; + agp_bridge.tlb_flush = intel_i460_tlb_flush; + agp_bridge.mask_memory = intel_i460_mask_memory; + agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = intel_i460_create_gatt_table; + agp_bridge.free_gatt_table = intel_i460_free_gatt_table; + agp_bridge.insert_memory = intel_i460_insert_memory; + agp_bridge.remove_memory = intel_i460_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = intel_i460_alloc_page; + agp_bridge.agp_destroy_page = intel_i460_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 1; + + return 0; + + (void) pdev; /* unused */ +} + +#endif /* CONFIG_AGP_I460 */ #ifdef CONFIG_AGP_INTEL @@ -3022,7 +3692,6 @@ struct pci_dev *device = NULL; u32 command, scratch; u8 cap_ptr; - u8 agp_v3; u8 v3_devs=0; /* FIXME: If 'mode' is x1/x2/x4 should we call the AGPv2 routines directly ? @@ -3055,77 +3724,14 @@ } - pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 4, &command); - - /* - * PASS2: go through all devices that claim to be - * AGP devices and collect their data. - */ - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) { - /* - * Ok, here we have a AGP device. Disable impossible - * settings, and adjust the readqueue to the minimum. - */ - - printk (KERN_INFO "AGP: Setting up AGPv3 capable device at %d:%d:%d\n", - device->bus->number, PCI_FUNC(device->devfn), PCI_SLOT(device->devfn)); - pci_read_config_dword(device, cap_ptr + 4, &scratch); - agp_v3 = (scratch & (1<<3) ) >>3; - - /* adjust RQ depth */ - command = - ((command & ~0xff000000) | - min_t(u32, (mode & 0xff000000), - min_t(u32, (command & 0xff000000), - (scratch & 0xff000000)))); - - /* disable SBA if it's not supported */ - if (!((command & 0x200) && (scratch & 0x200) && (mode & 0x200))) - command &= ~0x200; - - /* disable FW if it's not supported */ - if (!((command & 0x10) && (scratch & 0x10) && (mode & 0x10))) - command &= ~0x10; - - if (!((command & 2) && (scratch & 2) && (mode & 2))) { - command &= ~2; /* 8x */ - printk (KERN_INFO "AGP: Putting device into 8x mode\n"); - } - - if (!((command & 1) && (scratch & 1) && (mode & 1))) { - command &= ~1; /* 4x */ - printk (KERN_INFO "AGP: Putting device into 4x mode\n"); - } - } - } - /* - * PASS3: Figure out the 8X/4X setting and enable the - * target (our motherboard chipset). - */ - - if (command & 2) - command &= ~5; /* 8X */ - - if (command & 1) - command &= ~6; /* 4X */ + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + PCI_AGP_STATUS, &command); + command = agp_collect_device_status(mode, command); command |= 0x100; - pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx + 8, command); - - /* - * PASS4: Go through all AGP devices and update the - * command registers. - */ + pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx + PCI_AGP_COMMAND, command); - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) - pci_write_config_dword(device, cap_ptr + 8, command); - } + agp_device_command(command, 1); } @@ -3682,7 +4288,7 @@ /* Fill in the mode register */ pci_read_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + 4, + agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); pci_read_config_byte(agp_bridge.dev, @@ -3832,104 +4438,23 @@ static void serverworks_agp_enable(u32 mode) { - struct pci_dev *device = NULL; - u32 command, scratch, cap_id; - u8 cap_ptr; + u32 command; pci_read_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + 4, + agp_bridge.capndx + PCI_AGP_STATUS, &command); - /* - * PASS1: go throu all devices that claim to be - * AGP devices and collect their data. - */ - - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) { - do { - pci_read_config_dword(device, - cap_ptr, &cap_id); - - if ((cap_id & 0xff) != 0x02) - cap_ptr = (cap_id >> 8) & 0xff; - } - while (((cap_id & 0xff) != 0x02) && (cap_ptr != 0x00)); - } - if (cap_ptr != 0x00) { - /* - * Ok, here we have a AGP device. Disable impossible - * settings, and adjust the readqueue to the minimum. - */ - - pci_read_config_dword(device, cap_ptr + 4, &scratch); - - /* adjust RQ depth */ - command = - ((command & ~0xff000000) | - min_t(u32, (mode & 0xff000000), - min_t(u32, (command & 0xff000000), - (scratch & 0xff000000)))); - - /* disable SBA if it's not supported */ - if (!((command & 0x00000200) && - (scratch & 0x00000200) && - (mode & 0x00000200))) - command &= ~0x00000200; - - /* disable FW */ - command &= ~0x00000010; + command = agp_collect_device_status(mode, command); - command &= ~0x00000008; - - if (!((command & 4) && - (scratch & 4) && - (mode & 4))) - command &= ~0x00000004; - - if (!((command & 2) && - (scratch & 2) && - (mode & 2))) - command &= ~0x00000002; - - if (!((command & 1) && - (scratch & 1) && - (mode & 1))) - command &= ~0x00000001; - } - } - /* - * PASS2: Figure out the 4X/2X/1X setting and enable the - * target (our motherboard chipset). - */ - - if (command & 4) { - command &= ~3; /* 4X */ - } - if (command & 2) { - command &= ~5; /* 2X */ - } - if (command & 1) { - command &= ~6; /* 1X */ - } + command &= ~0x00000010; /* disable FW */ + command &= ~0x00000008; command |= 0x00000100; pci_write_config_dword(serverworks_private.svrwrks_dev, - agp_bridge.capndx + 8, + agp_bridge.capndx + PCI_AGP_COMMAND, command); - /* - * PASS3: Go throu all AGP devices and update the - * command registers. - */ - - pci_for_each_dev(device) { - cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); - if (cap_ptr != 0x00) - pci_write_config_dword(device, cap_ptr + 8, command); - } + agp_device_command(command, 0); } static int __init serverworks_setup (struct pci_dev *pdev) @@ -4009,6 +4534,8 @@ #define log2(x) ffz(~(x)) #endif +#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ + #define HP_ZX1_IOVA_BASE GB(1UL) #define HP_ZX1_IOVA_SIZE GB(1UL) #define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2) @@ -4023,14 +4550,9 @@ {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */ }; -static gatt_mask hp_zx1_masks[] = -{ - {HP_ZX1_PDIR_VALID_BIT, 0} -}; - static struct _hp_private { - struct pci_dev *ioc; - volatile u8 *registers; + volatile u8 *ioc_regs; + volatile u8 *lba_regs; u64 *io_pdir; // PDIR for entire IOVA u64 *gatt; // PDIR just for GART (subset of above) u64 gatt_entries; @@ -4057,7 +4579,7 @@ * - IOVA space is 1Gb in size * - first 512Mb is IOMMU, second 512Mb is GART */ - hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG); + hp->io_tlb_ps = INREG64(hp->ioc_regs, HP_ZX1_TCNFG); switch (hp->io_tlb_ps) { case 0: hp->io_tlb_shift = 12; break; case 1: hp->io_tlb_shift = 13; break; @@ -4073,13 +4595,13 @@ hp->io_page_size = 1 << hp->io_tlb_shift; hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size; - hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1; + hp->iova_base = INREG64(hp->ioc_regs, HP_ZX1_IBASE) & ~0x1; hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE; hp->gart_size = HP_ZX1_GART_SIZE; hp->gatt_entries = hp->gart_size / hp->io_page_size; - hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE)); + hp->io_pdir = phys_to_virt(INREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE)); hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)]; if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) { @@ -4093,7 +4615,7 @@ return 0; } -static int __init hp_zx1_ioc_owner(u8 ioc_rev) +static int __init hp_zx1_ioc_owner(void) { struct _hp_private *hp = &hp_private; @@ -4128,44 +4650,21 @@ return 0; } -static int __init hp_zx1_ioc_init(void) +static int __init hp_zx1_ioc_init(u64 ioc_hpa, u64 lba_hpa) { struct _hp_private *hp = &hp_private; - struct pci_dev *ioc; - int i; - u8 ioc_rev; - ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL); - if (!ioc) { - printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n"); - return -ENODEV; - } - hp->ioc = ioc; - - pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev); - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) { - hp->registers = (u8 *) ioremap(pci_resource_start(ioc, - i), - pci_resource_len(ioc, i)); - break; - } - } - if (!hp->registers) { - printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n"); - - return -ENODEV; - } + hp->ioc_regs = ioremap(ioc_hpa, 1024); + hp->lba_regs = ioremap(lba_hpa, 256); /* * If the IOTLB is currently disabled, we can take it over. * Otherwise, we have to share with sba_iommu. */ - hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0; + hp->io_pdir_owner = (INREG64(hp->ioc_regs, HP_ZX1_IBASE) & 0x1) == 0; if (hp->io_pdir_owner) - return hp_zx1_ioc_owner(ioc_rev); + return hp_zx1_ioc_owner(); return hp_zx1_ioc_shared(); } @@ -4185,19 +4684,17 @@ struct _hp_private *hp = &hp_private; agp_bridge.gart_bus_addr = hp->gart_base; - agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP); - pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); + agp_bridge.mode = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS); if (hp->io_pdir_owner) { - OUTREG64(hp->registers, HP_ZX1_PDIR_BASE, + OUTREG64(hp->ioc_regs, HP_ZX1_PDIR_BASE, virt_to_phys(hp->io_pdir)); - OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps); - OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); - OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1); - OUTREG64(hp->registers, HP_ZX1_PCOM, + OUTREG64(hp->ioc_regs, HP_ZX1_TCNFG, hp->io_tlb_ps); + OUTREG64(hp->ioc_regs, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1)); + OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, hp->iova_base | 0x1); + OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->iova_base | log2(HP_ZX1_IOVA_SIZE)); - INREG64(hp->registers, HP_ZX1_PCOM); + INREG64(hp->ioc_regs, HP_ZX1_PCOM); } return 0; @@ -4208,17 +4705,18 @@ struct _hp_private *hp = &hp_private; if (hp->io_pdir_owner) - OUTREG64(hp->registers, HP_ZX1_IBASE, 0); - iounmap((void *) hp->registers); + OUTREG64(hp->ioc_regs, HP_ZX1_IBASE, 0); + iounmap((void *) hp->ioc_regs); + iounmap((void *) hp->lba_regs); } static void hp_zx1_tlbflush(agp_memory * mem) { struct _hp_private *hp = &hp_private; - OUTREG64(hp->registers, HP_ZX1_PCOM, + OUTREG64(hp->ioc_regs, HP_ZX1_PCOM, hp->gart_base | log2(hp->gart_size)); - INREG64(hp->registers, HP_ZX1_PCOM); + INREG64(hp->ioc_regs, HP_ZX1_PCOM); } static int hp_zx1_create_gatt_table(void) @@ -4285,11 +4783,6 @@ j++; } - if (mem->is_flushed == FALSE) { - CACHE_FLUSH(); - mem->is_flushed = TRUE; - } - for (i = 0, j = io_pg_start; i < mem->page_count; i++) { unsigned long paddr; @@ -4329,14 +4822,23 @@ return HP_ZX1_PDIR_VALID_BIT | addr; } -static unsigned long hp_zx1_unmask_memory(unsigned long addr) +static void hp_zx1_agp_enable(u32 mode) { - return addr & ~(HP_ZX1_PDIR_VALID_BIT); + struct _hp_private *hp = &hp_private; + u32 command; + + command = INREG32(hp->lba_regs, HP_ZX1_AGP_STATUS); + + command = agp_collect_device_status(mode, command); + command |= 0x00000100; + + OUTREG32(hp->lba_regs, HP_ZX1_AGP_COMMAND, command); + + agp_device_command(command, 0); } -static int __init hp_zx1_setup (struct pci_dev *pdev) +static int __init hp_zx1_setup(u64 ioc_hpa, u64 lba_hpa) { - agp_bridge.masks = hp_zx1_masks; agp_bridge.dev_private_data = NULL; agp_bridge.size_type = FIXED_APER_SIZE; agp_bridge.needs_scratch_page = FALSE; @@ -4345,8 +4847,7 @@ agp_bridge.cleanup = hp_zx1_cleanup; agp_bridge.tlb_flush = hp_zx1_tlbflush; agp_bridge.mask_memory = hp_zx1_mask_memory; - agp_bridge.unmask_memory = hp_zx1_unmask_memory; - agp_bridge.agp_enable = agp_generic_agp_enable; + agp_bridge.agp_enable = hp_zx1_agp_enable; agp_bridge.cache_flush = global_cache_flush; agp_bridge.create_gatt_table = hp_zx1_create_gatt_table; agp_bridge.free_gatt_table = hp_zx1_free_gatt_table; @@ -4357,12 +4858,61 @@ agp_bridge.agp_alloc_page = agp_generic_alloc_page; agp_bridge.agp_destroy_page = agp_generic_destroy_page; agp_bridge.cant_use_aperture = 1; + agp_bridge.type = HP_ZX1; - return hp_zx1_ioc_init(); + fake_bridge_dev.vendor = PCI_VENDOR_ID_HP; + fake_bridge_dev.device = PCI_DEVICE_ID_HP_ZX1_LBA; - (void) pdev; /* unused */ + return hp_zx1_ioc_init(ioc_hpa, lba_hpa); +} + +static int __init acpi_hp_zx1_gart_add(struct acpi_device *device) +{ + acpi_handle handle, parent; + acpi_status status; + acpi_device_info info; + u64 lba_hpa, sba_hpa, length; + + status = acpi_hp_csr_space(device->handle, &lba_hpa, &length); + if (ACPI_FAILURE(status)) + return 1; + + /* Look for an enclosing IOC scope and find its CSR space */ + handle = device->handle; + do { + status = acpi_get_object_info(handle, &info); + if (ACPI_SUCCESS(status)) { + /* TBD check _CID also */ + info.hardware_id[sizeof(info.hardware_id)-1] = '\0'; + if (!strcmp(info.hardware_id, "HWP0001")) { + status = acpi_hp_csr_space(handle, &sba_hpa, + &length); + if (ACPI_SUCCESS(status)) + break; + else { + printk(KERN_ERR PFX "Detected HP ZX1 " + "AGP LBA but no IOC.\n"); + return 1; + } + } + } + + status = acpi_get_parent(handle, &parent); + handle = parent; + } while (ACPI_SUCCESS(status)); + + if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa)) + return 1; + + return 0; } +static struct acpi_driver acpi_hp_zx1_gart_driver = { + .name = "HP ZX1 GART Driver", + .ids = "HWP0003", + .ops = { .add = acpi_hp_zx1_gart_add }, +}; + #endif /* CONFIG_AGP_HP_ZX1 */ /* per-chipset initialization data. @@ -4558,6 +5108,15 @@ #endif /* CONFIG_AGP_INTEL */ +#ifdef CONFIG_AGP_I460 + { PCI_DEVICE_ID_INTEL_460GX, + PCI_VENDOR_ID_INTEL, + INTEL_460GX, + "Intel", + "460GX", + intel_i460_setup }, +#endif + #ifdef CONFIG_AGP_SIS { PCI_DEVICE_ID_SI_740, PCI_VENDOR_ID_SI, @@ -4702,15 +5261,6 @@ via_generic_setup }, #endif /* CONFIG_AGP_VIA */ -#ifdef CONFIG_AGP_HP_ZX1 - { PCI_DEVICE_ID_HP_ZX1_LBA, - PCI_VENDOR_ID_HP, - HP_ZX1, - "HP", - "ZX1", - hp_zx1_setup }, -#endif - { 0, }, /* dummy final entry, always present */ }; @@ -4789,6 +5339,18 @@ return -ENODEV; } +static int agp_check_supported_device(struct pci_dev *dev) { + + int i; + + for (i = 0; i < ARRAY_SIZE (agp_bridge_info); i++) { + if (dev->vendor == agp_bridge_info[i].vendor_id && + dev->device == agp_bridge_info[i].device_id) + return 1; + } + + return 0; +} /* Supported Device Scanning routine */ @@ -4797,8 +5359,24 @@ struct pci_dev *dev = NULL; u8 cap_ptr = 0x00; - if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) == NULL) - return -ENODEV; +#ifdef CONFIG_AGP_HP_ZX1 + int rc; + + rc = acpi_bus_register_driver(&acpi_hp_zx1_gart_driver); + if (rc > 0) { + printk(KERN_INFO PFX "Detected an HP ZX1 Chipset.\n"); + return 0; + } +#endif + + /* + * Some systems have multiple host bridges, so + * we can't just use the first one we find. + */ + do { + if ((dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev)) == NULL) + return -ENODEV; + } while (!agp_check_supported_device(dev)); agp_bridge.dev = dev; @@ -4958,23 +5536,6 @@ #endif /* CONFIG_AGP_SWORKS */ -#ifdef CONFIG_AGP_HP_ZX1 - if (dev->vendor == PCI_VENDOR_ID_HP) { - do { - /* ZX1 LBAs can be either PCI or AGP bridges */ - if (pci_find_capability(dev, PCI_CAP_ID_AGP)) { - printk(KERN_INFO PFX "Detected HP ZX1 AGP " - "chipset at %s\n", dev->slot_name); - agp_bridge.type = HP_ZX1; - agp_bridge.dev = dev; - return hp_zx1_setup(dev); - } - dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev); - } while (dev); - return -ENODEV; - } -#endif /* CONFIG_AGP_HP_ZX1 */ - /* find capndx */ cap_ptr = pci_find_capability(dev, PCI_CAP_ID_AGP); if (cap_ptr == 0x00) @@ -4983,7 +5544,7 @@ /* Fill in the mode register */ pci_read_config_dword(agp_bridge.dev, - agp_bridge.capndx + 4, + agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode); /* probe for known chipsets */ @@ -5181,7 +5742,8 @@ inter_module_register("drm_agp", THIS_MODULE, &drm_agp); - pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power); + if (agp_bridge.dev) + pm_register(PM_PCI_DEV, PM_PCI_ID(agp_bridge.dev), agp_power); return 0; } diff -Nru a/drivers/char/drm/drm_agpsupport.h b/drivers/char/drm/drm_agpsupport.h --- a/drivers/char/drm/drm_agpsupport.h Wed Oct 8 09:08:22 2003 +++ b/drivers/char/drm/drm_agpsupport.h Wed Oct 8 09:08:22 2003 @@ -270,6 +270,7 @@ case INTEL_I840: head->chipset = "Intel i840"; break; case INTEL_I845: head->chipset = "Intel i845"; break; case INTEL_I850: head->chipset = "Intel i850"; break; + case INTEL_460GX: head->chipset = "Intel 460GX"; break; case VIA_GENERIC: head->chipset = "VIA"; break; case VIA_VP3: head->chipset = "VIA VP3"; break; diff -Nru a/drivers/char/drm-4.0/agpsupport.c b/drivers/char/drm-4.0/agpsupport.c --- a/drivers/char/drm-4.0/agpsupport.c Wed Oct 8 09:08:22 2003 +++ b/drivers/char/drm-4.0/agpsupport.c Wed Oct 8 09:08:22 2003 @@ -30,6 +30,7 @@ #define __NO_VERSION__ #include "drmP.h" +#include #include #if LINUX_VERSION_CODE < 0x020400 #include "agpsupport-pre24.h" @@ -264,6 +265,7 @@ #if LINUX_VERSION_CODE >= 0x020400 case INTEL_I840: head->chipset = "Intel i840"; break; #endif + case INTEL_460GX: head->chipset = "Intel 460GX"; break; case VIA_GENERIC: head->chipset = "VIA"; break; case VIA_VP3: head->chipset = "VIA VP3"; break; @@ -298,8 +300,17 @@ case SVWRKS_HE: head->chipset = "Serverworks HE"; break; case SVWRKS_LE: head->chipset = "Serverworks LE"; break; + case HP_ZX1: head->chipset = "HP ZX1"; break; + default: head->chipset = "Unknown"; break; } +#if LINUX_VERSION_CODE <= 0x020408 + head->cant_use_aperture = 0; + head->page_mask = ~(0xfff); +#else + head->cant_use_aperture = head->agp_info.cant_use_aperture; + head->page_mask = head->agp_info.page_mask; +#endif DRM_INFO("AGP %d.%d on %s @ 0x%08lx %ZuMB\n", head->agp_info.version.major, head->agp_info.version.minor, diff -Nru a/drivers/char/drm-4.0/i810_dma.c b/drivers/char/drm-4.0/i810_dma.c --- a/drivers/char/drm-4.0/i810_dma.c Wed Oct 8 09:08:22 2003 +++ b/drivers/char/drm-4.0/i810_dma.c Wed Oct 8 09:08:22 2003 @@ -309,7 +309,7 @@ if(dev_priv->ring.virtual_start) { drm_ioremapfree((void *) dev_priv->ring.virtual_start, - dev_priv->ring.Size); + dev_priv->ring.Size, dev); } if(dev_priv->hw_status_page != 0UL) { i810_free_page(dev, dev_priv->hw_status_page); @@ -323,7 +323,8 @@ for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[ i ]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - drm_ioremapfree(buf_priv->kernel_virtual, buf->total); + drm_ioremapfree(buf_priv->kernel_virtual, + buf->total, dev); } } return 0; @@ -397,7 +398,7 @@ *buf_priv->in_use = I810_BUF_FREE; buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, - buf->total); + buf->total, dev); } return 0; } @@ -434,7 +435,7 @@ dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + init->ring_start, - init->ring_size); + init->ring_size, dev); dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; diff -Nru a/drivers/net/Makefile b/drivers/net/Makefile --- a/drivers/net/Makefile Wed Oct 8 09:08:22 2003 +++ b/drivers/net/Makefile Wed Oct 8 09:08:22 2003 @@ -142,6 +142,7 @@ obj-$(CONFIG_LNE390) += lne390.o 8390.o obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_NET_SB1250_MAC) += sb1250-mac.o +obj-$(CONFIG_HP_SIMETH) += simeth.o obj-$(CONFIG_PPP) += ppp_generic.o slhc.o obj-$(CONFIG_PPP_ASYNC) += ppp_async.o diff -Nru a/drivers/net/eepro100.c b/drivers/net/eepro100.c --- a/drivers/net/eepro100.c Wed Oct 8 09:08:22 2003 +++ b/drivers/net/eepro100.c Wed Oct 8 09:08:22 2003 @@ -2387,6 +2387,7 @@ { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, }, diff -Nru a/include/linux/agp_backend.h b/include/linux/agp_backend.h --- a/include/linux/agp_backend.h Wed Oct 8 09:08:22 2003 +++ b/include/linux/agp_backend.h Wed Oct 8 09:08:22 2003 @@ -53,6 +53,7 @@ INTEL_I845, INTEL_I850, INTEL_I860, + INTEL_460GX, VIA_GENERIC, VIA_VP3, VIA_MVP3, @@ -122,6 +123,7 @@ size_t page_count; int num_scratch_pages; unsigned long *memory; + void *vmptr; off_t pg_start; u32 type; u32 physical;